This assignment is based off of this 2D object detection tutorial which uses pytorch to implement the SSD network in order to detect objects in images within the VOC Dataset. https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection
Only the mount portion has to be run if you already have the dataset downloaded and the json files.
First we mount our google drive
# # TODO: Done: set runtime to GPU
# from google.colab import drive
# drive.mount('/content/gdrive')
# # Go to the your assignment directory
# %cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/
Next download the VOC 2007 Dataset. This takes 6.2 minutes.
# import requests
# import tarfile
# import io
# import time
# def download_and_unzip(url, path):
# dl_start = time.time()
# r = requests.get(url)
# dl_end = time.time()
# print("download time elapsed:", dl_end - dl_start)
# tar = tarfile.TarFile(fileobj=io.BytesIO(r.content))
# # extract the contents of VOC2007
# extract_start = time.time()
# subdir_and_files = [
# tarinfo for tarinfo in tar.getmembers()
# if tarinfo.name.startswith("VOCdevkit/VOC2007/")
# ]
# tar.extractall(path=path, members=subdir_and_files)
# extract_end = time.time()
# print("extract time elapsed:", extract_end - extract_start)
# # Go to the your assignment directory
# %cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/
# start = time.time()
# download_and_unzip(
# "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar",
# "/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4"
# )
# download_and_unzip(
# "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar",
# "/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4"
# )
# end = time.time()
# print("total time elapsed:", end - start)
Sync the data to your google drive. This should take 33 minutes. You must restart the runtime after this by clicking Runtime -> Restart runtime.
# start = time.time()
# drive.flush_and_unmount()
# end = time.time()
# print("total time elapsed:", end - start)
Check that the data is downloaded and that you have the json files. This also remounts the google drive.
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
# Go to the your assignment directory
%cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/
# Check location
!ls
## You should have this output:
# /content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4
# ece495_assignment4.ipynb utils.py VOCdevkit
# You should also have the json files
# and also the checkpoint if you have already trained the model
Mounted at /content/gdrive /content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4 ece495_a4_tabdelme.ipynb __pycache__ TRAIN_images.json VOCdevkit ece495_a4_unsolved.ipynb TEST_images.json TRAIN_objects.json label_map.json TEST_objects.json utils.py
This code does not have to be run, the files it creates are given with the assignment. It creates the json files: label_map.json, TRAIN_images.json, TRAIN_objects TEST_images.json and TEST_objects. These are the image paths, ground truth object information and label to number mapping. This should take about 45 miniutes if the data has not been cached.
# from utils import create_data_lists
# import time
# start = time.time()
# create_data_lists(voc07_path='/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4/VOCdevkit/VOC2007',
# voc12_path=None, # Removed VOC 2012 to reduce data size requirement of this assignment
# output_folder='./')
# end = time.time()
# print("time elapsed:", end - start)
Next the Dataset loader for VOC is implemented
import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from utils import transform
class PascalVOCDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_folder, split, keep_difficult=False):
"""
:param data_folder: folder where data files are stored
:param split: split, one of 'TRAIN' or 'TEST'
:param keep_difficult: keep or discard objects that are considered difficult to detect?
"""
self.split = split.upper()
assert self.split in {'TRAIN', 'TEST'}
self.data_folder = data_folder
self.keep_difficult = keep_difficult
# Read data files
with open(os.path.join(data_folder, self.split + '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(data_folder, self.split + '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __getitem__(self, i):
# Read image
image = Image.open(self.images[i], mode='r')
image = image.convert('RGB')
# Read objects in this image (bounding boxes, labels, difficulties)
objects = self.objects[i]
boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)
labels = torch.LongTensor(objects['labels']) # (n_objects)
difficulties = torch.ByteTensor(objects['difficulties']) # (n_objects)
# Discard difficult objects, if desired
if not self.keep_difficult:
boxes = boxes[1 - difficulties]
labels = labels[1 - difficulties]
difficulties = difficulties[1 - difficulties]
# Apply transformations
image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split)
return image, boxes, labels, difficulties
def __len__(self):
return len(self.images)
def collate_fn(self, batch):
"""
Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader).
This describes how to combine these tensors of different sizes. We use lists.
Note: this need not be defined in this Class, can be standalone.
:param batch: an iterable of N sets from __getitem__()
:return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties
"""
images = list()
boxes = list()
labels = list()
difficulties = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
difficulties.append(b[3])
images = torch.stack(images, dim=0)
return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each
First we create the base or encoder part of the network.
You must fill in the ResNet code.
from torch import nn
from utils import *
import torch.nn.functional as F
from math import sqrt
from itertools import product as product
import torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class VGGBase(nn.Module):
"""
VGG base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super(VGGBase, self).__init__()
# Standard convolutional layers in VGG16
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) # stride = 1, by default
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) # ceiling (not floor) here for even dims
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) # retains size because stride is 1 (and padding)
# Replacements for FC6 and FC7 in VGG16
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) # atrous convolution
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
# Load pretrained layers
self.load_pretrained_layers()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: lower-level feature maps conv4_3 and conv7
"""
out = F.relu(self.conv1_1(image)) # (N, 64, 300, 300)
out = F.relu(self.conv1_2(out)) # (N, 64, 300, 300)
out = self.pool1(out) # (N, 64, 150, 150)
out = F.relu(self.conv2_1(out)) # (N, 128, 150, 150)
out = F.relu(self.conv2_2(out)) # (N, 128, 150, 150)
out = self.pool2(out) # (N, 128, 75, 75)
out = F.relu(self.conv3_1(out)) # (N, 256, 75, 75)
out = F.relu(self.conv3_2(out)) # (N, 256, 75, 75)
out = F.relu(self.conv3_3(out)) # (N, 256, 75, 75)
out = self.pool3(out) # (N, 256, 38, 38), it would have been 37 if not for ceil_mode = True
out = F.relu(self.conv4_1(out)) # (N, 512, 38, 38)
out = F.relu(self.conv4_2(out)) # (N, 512, 38, 38)
out = F.relu(self.conv4_3(out)) # (N, 512, 38, 38)
conv4_3_feats = out # (N, 512, 38, 38)
out = self.pool4(out) # (N, 512, 19, 19)
out = F.relu(self.conv5_1(out)) # (N, 512, 19, 19)
out = F.relu(self.conv5_2(out)) # (N, 512, 19, 19)
out = F.relu(self.conv5_3(out)) # (N, 512, 19, 19)
out = self.pool5(out) # (N, 512, 19, 19), pool5 does not reduce dimensions
out = F.relu(self.conv6(out)) # (N, 1024, 19, 19)
conv7_feats = F.relu(self.conv7(out)) # (N, 1024, 19, 19)
# Lower-level feature maps
return conv4_3_feats, conv7_feats
def load_pretrained_layers(self):
"""
As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network.
There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16
We copy these parameters into our network. It's straightforward for conv1 to conv5.
However, the original VGG-16 does not contain the conv6 and con7 layers.
Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py.
"""
# Current state of base
state_dict = self.state_dict()
param_names = list(state_dict.keys())
# Pretrained VGG base
pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()
pretrained_param_names = list(pretrained_state_dict.keys())
# Transfer conv. parameters from pretrained model to current model
for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters
state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]
# Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7
# fc6
conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)
conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)
state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)
state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)
# fc7
conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)
conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)
state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)
state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)
# Note: an FC layer of size (K) operating on a flattened version (C*H*W) of a 2D image of size (C, H, W)...
# ...is equivalent to a convolutional layer with kernel size (H, W), input channels C, output channels K...
# ...operating on the 2D image of size (C, H, W) without padding
self.load_state_dict(state_dict)
print("\nLoaded base model.\n")
class ResNetBase(nn.Module):
"""
ResNet base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super(ResNetBase, self).__init__()
# TODO: Done: Load pretrained resnet 50 model (for feature extraction)
self.resnet = torchvision.models.resnet50(pretrained=True)
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: lower-level feature maps
"""
# TODO: Done: Add your code
# The shapes in the following comments are training data example sizes
### Layer 0 ###
x = self.resnet.conv1(image) # (N, 64, 150, 150)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x) # (N, 64, 75, 75)
### Layer 1 ###
x = self.resnet.layer1(x) # (N, 256, 75, 75)
### Layer 2 ###
conv_512_feats = self.resnet.layer2(x) # (N, 512, 38, 38)
### Layer 3 ###
conv_1024_feats = self.resnet.layer3(conv_512_feats) # (N, 1024, 19, 19)
# Lower-level feature maps
return conv_512_feats, conv_1024_feats
print(torchvision.models.resnet50(pretrained=True).layer2[0])
The base layers created the low level feature maps with 512 and 1024 features. Now the higher level feature maps are created for 512, 256, 256 and 256 feature maps.
class AuxiliaryConvolutions(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutions, self).__init__()
# Auxiliary/additional convolutions on top of the VGG base
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) # stride = 1, by default
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats)) # (N, 256, 19, 19)
out = F.relu(self.conv8_2(out)) # (N, 512, 10, 10)
conv8_2_feats = out # (N, 512, 10, 10)
out = F.relu(self.conv9_1(out)) # (N, 128, 10, 10)
out = F.relu(self.conv9_2(out)) # (N, 256, 5, 5)
conv9_2_feats = out # (N, 256, 5, 5)
out = F.relu(self.conv10_1(out)) # (N, 128, 5, 5)
out = F.relu(self.conv10_2(out)) # (N, 256, 3, 3)
conv10_2_feats = out # (N, 256, 3, 3)
out = F.relu(self.conv11_1(out)) # (N, 128, 3, 3)
conv11_2_feats = F.relu(self.conv11_2(out)) # (N, 256, 1, 1)
# Higher-level feature maps
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
At this point we have our 6 feature maps.
The low level feature maps: (N, 512, 38, 38), (N, 1024, 19, 19)
Also the high level feature maps: (N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1)
Each prior box requires a classification output of size number of classes and also the 4 box location values that are regressed. These convolutions are created in the init function.
In the forward pass all the convolutions are performed on their respective input feature maps. After that there is some work done to modify the tensors and then concatonate them in order to have the classification output shaped like (N, 8732, n_classes) and the box output to be (N, 8732, 4). This is a format that will be easier to work with when the network output is passed to the loss function during training or the output is passed through NMS during testing.
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
# Number of prior-boxes we are considering per position in each feature map
n_boxes = {'conv4_3': 4,
'conv7': 6,
'conv8_2': 6,
'conv9_2': 6,
'conv10_2': 4,
'conv11_2': 4}
# 4 prior-boxes implies we use 4 different aspect ratios, etc.
# Localization prediction convolutions (predict offsets w.r.t prior-boxes)
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
# Predict localization boxes' bounds (as offsets w.r.t prior-boxes)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats) # (N, 16, 38, 38)
l_conv4_3 = l_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 16), to match prior-box order (after .view())
# (.contiguous() ensures it is stored in a contiguous chunk of memory, needed for .view() below)
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) # (N, 5776, 4), there are a total 5776 boxes on this feature map
l_conv7 = self.loc_conv7(conv7_feats) # (N, 24, 19, 19)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 24)
l_conv7 = l_conv7.view(batch_size, -1, 4) # (N, 2166, 4), there are a total 2116 boxes on this feature map
l_conv8_2 = self.loc_conv8_2(conv8_2_feats) # (N, 24, 10, 10)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 24)
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) # (N, 600, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats) # (N, 24, 5, 5)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 24)
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) # (N, 150, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats) # (N, 16, 3, 3)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 16)
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) # (N, 36, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats) # (N, 16, 1, 1)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 16)
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) # (N, 4, 4)
# Predict classes in localization boxes
c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 38, 38)
c_conv4_3 = c_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 4 * n_classes), to match prior-box order (after .view())
c_conv4_3 = c_conv4_3.view(batch_size, -1,
self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map
c_conv7 = self.cl_conv7(conv7_feats) # (N, 6 * n_classes, 19, 19)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 6 * n_classes)
c_conv7 = c_conv7.view(batch_size, -1,
self.n_classes) # (N, 2166, n_classes), there are a total 2116 boxes on this feature map
c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 6 * n_classes, 10, 10)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 6 * n_classes)
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 600, n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats) # (N, 6 * n_classes, 5, 5)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 6 * n_classes)
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) # (N, 150, n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats) # (N, 4 * n_classes, 3, 3)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 4 * n_classes)
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) # (N, 36, n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats) # (N, 4 * n_classes, 1, 1)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 4 * n_classes)
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) # (N, 4, n_classes)
# A total of 8732 boxes
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) # (N, 8732, 4)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2],
dim=1) # (N, 8732, n_classes)
return locs, classes_scores
init - Defines all network layers and created prior boxes
create_prior_boxes - Create 8732 prior boxes across the 6 feature maps
forward - Send the input data through the three network components and then return the predicted locations and classification scores.
detect_objects - After a forward pass the predicted objects can be sent to this function during testing in order to perform NMS for the final output.
class SSD300(nn.Module):
"""
The SSD300 network - encapsulates the base network, auxiliary, and prediction convolutions.
"""
def __init__(self, n_classes, base_type):
super(SSD300, self).__init__()
self.n_classes = n_classes
if base_type == 'VGG':
self.base = VGGBase()
elif base_type == 'ResNet':
self.base = ResNetBase()
else:
raise NotImplementedError
self.aux_convs = AuxiliaryConvolutions()
self.pred_convs = PredictionConvolutions(n_classes)
# Since lower level features (conv4_3_feats) have considerably larger scales, we take the L2 norm and rescale
# Rescale factor is initially set at 20, but is learned for each channel during back-prop
self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) # there are 512 channels in conv4_3_feats
nn.init.constant_(self.rescale_factors, 20)
# Prior boxes
self.priors_cxcy = self.create_prior_boxes()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
# Run VGG base network convolutions (lower level feature map generators)
conv4_3_feats, conv7_feats = self.base(image) # (N, 512, 38, 38), (N, 1024, 19, 19)
# Rescale conv4_3 after L2 norm
norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt() # (N, 1, 38, 38)
conv4_3_feats = conv4_3_feats / norm # (N, 512, 38, 38)
conv4_3_feats = conv4_3_feats * self.rescale_factors # (N, 512, 38, 38)
# (PyTorch autobroadcasts singleton dimensions during arithmetic)
# Run auxiliary convolutions (higher level feature map generators)
conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = \
self.aux_convs(conv7_feats) # (N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1)
# Run prediction convolutions (predict offsets w.r.t prior-boxes and classes in each resulting localization box)
locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats,
conv11_2_feats) # (N, 8732, 4), (N, 8732, n_classes)
return locs, classes_scores
def create_prior_boxes(self):
"""
Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.
:return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4)
"""
fmap_dims = {'conv4_3': 38,
'conv7': 19,
'conv8_2': 10,
'conv9_2': 5,
'conv10_2': 3,
'conv11_2': 1}
obj_scales = {'conv4_3': 0.1,
'conv7': 0.2,
'conv8_2': 0.375,
'conv9_2': 0.55,
'conv10_2': 0.725,
'conv11_2': 0.9}
aspect_ratios = {'conv4_3': [1., 2., 0.5],
'conv7': [1., 2., 3., 0.5, .333],
'conv8_2': [1., 2., 3., 0.5, .333],
'conv9_2': [1., 2., 3., 0.5, .333],
'conv10_2': [1., 2., 0.5],
'conv11_2': [1., 2., 0.5]}
fmaps = list(fmap_dims.keys())
prior_boxes = []
for k, fmap in enumerate(fmaps):
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios[fmap]:
prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])
# For an aspect ratio of 1, use an additional prior whose scale is the geometric mean of the
# scale of the current feature map and the scale of the next feature map
if ratio == 1.:
try:
additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]])
# For the last feature map, there is no "next" feature map
except IndexError:
additional_scale = 1.
prior_boxes.append([cx, cy, additional_scale, additional_scale])
prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (8732, 4)
prior_boxes.clamp_(0, 1) # (8732, 4)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
"""
Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = F.softmax(predicted_scores, dim=2) # (N, 8732, n_classes)
# Lists to store final predicted boxes, labels, and scores for all images
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
decoded_locs = cxcy_to_xy(
gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) # (8732, 4), these are fractional pt. coordinates
# Lists to store boxes and scores for this image
image_boxes = list()
image_labels = list()
image_scores = list()
max_scores, best_label = predicted_scores[i].max(dim=1) # (8732)
# Check for each class
for c in range(1, self.n_classes):
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = predicted_scores[i][:, c] # (8732)
score_above_min_score = class_scores > min_score # torch.uint8 (byte) tensor, for indexing
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score] # (n_qualified), n_min_score <= 8732
class_decoded_locs = decoded_locs[score_above_min_score] # (n_qualified, 4)
# Sort predicted boxes and scores by scores
class_scores, sort_ind = class_scores.sort(dim=0, descending=True) # (n_qualified), (n_min_score)
class_decoded_locs = class_decoded_locs[sort_ind] # (n_min_score, 4)
# Find the overlap between predicted boxes
overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score)
# Non-Maximum Suppression (NMS)
# A torch.uint8 (byte) tensor to keep track of which predicted boxes to suppress
# 1 implies suppress, 0 implies don't suppress
suppress = torch.zeros((n_above_min_score), dtype=torch.uint8).to(device) # (n_qualified)
# Consider each box in order of decreasing scores
for box in range(class_decoded_locs.size(0)):
# If this box is already marked for suppression
if suppress[box] == 1:
continue
# Suppress boxes whose overlaps (with this box) are greater than maximum overlap
# Find such boxes and update suppress indices
suppress = torch.max(suppress, overlap[box] > max_overlap)
# The max operation retains previously suppressed boxes, like an 'OR' operation
# Don't suppress this box, even though it has an overlap of 1 with itself
suppress[box] = 0
# Store only unsuppressed boxes for this class
image_boxes.append(class_decoded_locs[1 - suppress])
image_labels.append(torch.LongTensor((1 - suppress).sum().item() * [c]).to(device))
image_scores.append(class_scores[1 - suppress])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
image_labels.append(torch.LongTensor([0]).to(device))
image_scores.append(torch.FloatTensor([0.]).to(device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k] # (top_k)
image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4)
image_labels = image_labels[sort_ind][:top_k] # (top_k)
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores # lists of length batch_size
During training the output from the SSD forward pass is then sent to the criterion (set to this function) in order to calculate the loss.
class MultiBoxLoss(nn.Module):
"""
The MultiBox loss, a loss function for object detection.
This is a combination of:
(1) a localization loss for the predicted locations of the boxes, and
(2) a confidence loss for the predicted class scores.
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = cxcy_to_xy(priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
self.smooth_l1 = nn.L1Loss()
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
"""
Forward propagation.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param boxes: true object bounding boxes in boundary coordinates, a list of N tensors
:param labels: true object labels, a list of N tensors
:return: multibox loss, a scalar
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
n_classes = predicted_scores.size(2)
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device) # (N, 8732, 4)
true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device) # (N, 8732)
# For each image
for i in range(batch_size):
n_objects = boxes[i].size(0)
overlap = find_jaccard_overlap(boxes[i],
self.priors_xy) # (n_objects, 8732)
# For each prior, find the object that has the maximum overlap
overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (8732)
# We don't want a situation where an object is not represented in our positive (non-background) priors -
# 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior.
# 2. All priors with the object may be assigned as background based on the threshold (0.5).
# To remedy this -
# First, find the prior that has the maximum overlap for each object.
_, prior_for_each_object = overlap.max(dim=1) # (N_o)
# Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)
object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device)
# To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)
overlap_for_each_prior[prior_for_each_object] = 1.
# Labels for each prior
label_for_each_prior = labels[i][object_for_each_prior] # (8732)
# Set priors whose overlaps with objects are less than the threshold to be background (no object)
label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (8732)
# Store
true_classes[i] = label_for_each_prior
# Encode center-size object coordinates into the form we regressed predicted boxes to
true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) # (8732, 4)
# Identify priors that are positive (object/non-background)
positive_priors = true_classes != 0 # (N, 8732)
# LOCALIZATION LOSS
# Localization loss is computed only over positive (non-background) priors
loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar
# Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732)
# So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)
# CONFIDENCE LOSS
# Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
# That is, FOR EACH IMAGE,
# we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss
# This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance
# Number of positive and hard-negative priors per image
n_positives = positive_priors.sum(dim=1) # (N)
n_hard_negatives = self.neg_pos_ratio * n_positives # (N)
# First, find the loss for all priors
conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) # (N * 8732)
conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, 8732)
# We already know which priors are positive
conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives))
# Next, find which priors are hard-negative
# To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
conf_loss_neg = conf_loss_all.clone() # (N, 8732)
conf_loss_neg[positive_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device) # (N, 8732)
hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, 8732)
conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives))
# As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar
# TOTAL LOSS
return conf_loss + self.alpha * loc_loss
With the model implemented it is time to train. Should take 2 hours and 9 minutes for 10 epochs. Should take 1 hour and 5 minutes for only the VOC2007 dataset with 16 epochs.
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
# from model import SSD300, MultiBoxLoss
# from datasets import PascalVOCDataset
from utils import *
# TODO: Done: Import a learning rate scheduler
from torch.optim.lr_scheduler import MultiStepLR
# Data parameters
data_folder = './' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
checkpoint = None # path to model checkpoint, None if none
batch_size = 6 # batch size
iterations = 15000 # 120000 # number of iterations to train (DON'T CHANGE)
workers = 4 # number of workers for loading data in the DataLoader
print_freq = 200 # print training status every __ batches
momentum = 0.9 # momentum
weight_decay = 5e-4 # weight decay
grad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation
cudnn.benchmark = True
# Overwrite the checkpoint function in utils
def save_checkpoint(epoch, model, optimizer, base_type, scheduler):
"""
Save model checkpoint.
:param epoch: epoch number
:param model: model
:param optimizer: optimizer
:param base_type: The base network type
"""
state = {'epoch': epoch,
'model': model,
'optimizer': optimizer,
'scheduler': scheduler}
if scheduler == None:
filename = 'checkpoint_ssd300_' + base_type + '.pth.tar'
else:
filename = 'checkpoint_ssd300_' + base_type + '_scheduler.pth.tar'
torch.save(state, filename)
def train_SSD(base_type, lr_type):
"""
Training.
"""
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='train',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
if lr_type == 'original_scheduler':
lr = 1e-3 # learning rate
decay_lr_at = [10000, 12500] # [80000, 100000] # decay learning rate after these many iterations
decay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate
elif lr_type == 'pytorch_scheduler':
lr = 1e-3 # learning rate
else:
raise NotImplementedError
# Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)
# To convert iterations to epochs, divide iterations by the number of iterations per epoch
# The original paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations
epochs = iterations // (len(train_dataset) // batch_size)
print("Number of iterations", iterations)
print("Dataset length", len(train_dataset))
print("batch size", batch_size)
print("Number of Epochs to train:", epochs)
if lr_type == 'original_scheduler':
decay_lr_at = [it // (len(train_dataset) // batch_size) for it in decay_lr_at]
print("Epochs to decay learning rate:", decay_lr_at)
# TODO: Done: Add custom scheduler variable here (to be accessed by other code in function scope)
custom_scheduler = None
# Initialize model or load checkpoint
if checkpoint is None:
start_epoch = 0
model = SSD300(n_classes=n_classes, base_type=base_type)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
if lr_type == 'pytorch_scheduler':
# TODO: Done: Create new scheduler
# Set variables for learning rate decay
decay_lr_at = [10000, 12500]
decay_lr_at = [it // (len(train_dataset) // batch_size) for it in decay_lr_at]
decay_lr_to = 0.1
# Implements original learning rate schedule using PyTorch Scheduler Library
custom_scheduler = MultiStepLR(optimizer, milestones=decay_lr_at, gamma=decay_lr_to, verbose=True)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
optimizer = checkpoint['optimizer']
if lr_type == 'pytorch_scheduler':
# TODO: Done: Load scheduler
custom_scheduler = checkpoint['scheduler']
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
# Epochs
for epoch in range(start_epoch, epochs):
# Decay learning rate at particular epochs
if lr_type == 'original_scheduler':
if epoch in decay_lr_at:
adjust_learning_rate(optimizer, decay_lr_to)
# One epoch's training
start_epoch_time = time.time()
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
end_epoch_time = time.time()
print("One epoch time elapsed:", end_epoch_time - start_epoch_time)
# TODO: Done: Update the learning rate
if lr_type == 'pytorch_scheduler':
custom_scheduler.step()
# Save checkpoint
if lr_type == 'original_scheduler':
save_checkpoint(epoch, model, optimizer, base_type, scheduler=None)
else:
# TODO: Done: Call save_checkpoint with your scheduler
save_checkpoint(epoch, model, optimizer, base_type, custom_scheduler)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
# Batches
for i, (images, boxes, labels, _) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to default device
images = images.to(device) # (batch_size (N), 3, 300, 300)
boxes = [b.to(device) for b in boxes]
labels = [l.to(device) for l in labels]
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
This can be run without making any changes to the code.
start_time = time.time()
train_SSD(base_type='VGG', lr_type='original_scheduler')
end_time = time.time()
print("time elapsed:", end_time - start_time)
Number of iterations 15000 Dataset length 5011 batch size 6 Number of Epochs to train: 17 Epochs to decay learning rate: [11, 14]
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.cache/torch/hub/checkpoints/vgg16-397923af.pth
Loaded base model.
/usr/local/lib/python3.7/dist-packages/torch/nn/_reduction.py:44: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. warnings.warn(warning.format(ret))
Epoch: [0][0/836] Batch Time 5.189 (5.189) Data Time 1.924 (1.924) Loss 24.0142 (24.0142) Epoch: [0][200/836] Batch Time 0.262 (0.524) Data Time 0.000 (0.261) Loss 6.6101 (10.6765) Epoch: [0][400/836] Batch Time 0.259 (0.473) Data Time 0.000 (0.213) Loss 5.4872 (8.6393) Epoch: [0][600/836] Batch Time 0.265 (0.452) Data Time 0.000 (0.194) Loss 6.2219 (7.8495) Epoch: [0][800/836] Batch Time 0.267 (0.442) Data Time 0.000 (0.185) Loss 6.5189 (7.3908) One epoch time elapsed: 369.6647717952728 Epoch: [1][0/836] Batch Time 1.374 (1.374) Data Time 1.026 (1.026) Loss 6.0477 (6.0477) Epoch: [1][200/836] Batch Time 0.254 (0.277) Data Time 0.000 (0.006) Loss 5.4098 (5.9190) Epoch: [1][400/836] Batch Time 0.269 (0.274) Data Time 0.000 (0.003) Loss 5.8402 (5.8327) Epoch: [1][600/836] Batch Time 0.313 (0.272) Data Time 0.000 (0.002) Loss 5.4972 (5.7653) Epoch: [1][800/836] Batch Time 0.260 (0.272) Data Time 0.000 (0.002) Loss 6.0983 (5.7014) One epoch time elapsed: 226.58396458625793 Epoch: [2][0/836] Batch Time 1.507 (1.507) Data Time 1.151 (1.151) Loss 5.0236 (5.0236) Epoch: [2][200/836] Batch Time 0.257 (0.278) Data Time 0.000 (0.006) Loss 5.5074 (5.3674) Epoch: [2][400/836] Batch Time 0.272 (0.273) Data Time 0.000 (0.003) Loss 5.4188 (5.3171) Epoch: [2][600/836] Batch Time 0.256 (0.272) Data Time 0.000 (0.003) Loss 5.4381 (5.2673) Epoch: [2][800/836] Batch Time 0.269 (0.272) Data Time 0.000 (0.002) Loss 4.9033 (5.2219) One epoch time elapsed: 227.13922595977783 Epoch: [3][0/836] Batch Time 1.304 (1.304) Data Time 0.935 (0.935) Loss 4.6588 (4.6588) Epoch: [3][200/836] Batch Time 0.263 (0.280) Data Time 0.000 (0.007) Loss 5.1552 (4.9833) Epoch: [3][400/836] Batch Time 0.299 (0.275) Data Time 0.010 (0.004) Loss 5.1597 (4.9504) Epoch: [3][600/836] Batch Time 0.256 (0.273) Data Time 0.000 (0.003) Loss 5.5343 (4.8814) Epoch: [3][800/836] Batch Time 0.259 (0.272) Data Time 0.000 (0.002) Loss 5.0805 (4.8369) One epoch time elapsed: 227.52859377861023 Epoch: [4][0/836] Batch Time 1.471 (1.471) Data Time 1.135 (1.135) Loss 5.6060 (5.6060) Epoch: [4][200/836] Batch Time 0.255 (0.277) Data Time 0.000 (0.006) Loss 4.7506 (4.7231) Epoch: [4][400/836] Batch Time 0.286 (0.272) Data Time 0.000 (0.003) Loss 4.4194 (4.6814) Epoch: [4][600/836] Batch Time 0.257 (0.273) Data Time 0.000 (0.002) Loss 4.3648 (4.6391) Epoch: [4][800/836] Batch Time 0.274 (0.272) Data Time 0.000 (0.002) Loss 4.5902 (4.5944) One epoch time elapsed: 227.13939213752747 Epoch: [5][0/836] Batch Time 1.598 (1.598) Data Time 1.232 (1.232) Loss 3.9429 (3.9429) Epoch: [5][200/836] Batch Time 0.272 (0.281) Data Time 0.000 (0.007) Loss 5.5429 (4.4431) Epoch: [5][400/836] Batch Time 0.279 (0.276) Data Time 0.000 (0.004) Loss 4.4916 (4.4459) Epoch: [5][600/836] Batch Time 0.278 (0.274) Data Time 0.000 (0.003) Loss 4.7119 (4.4153) Epoch: [5][800/836] Batch Time 0.285 (0.272) Data Time 0.000 (0.002) Loss 4.7502 (4.3842) One epoch time elapsed: 227.32315230369568 Epoch: [6][0/836] Batch Time 1.642 (1.642) Data Time 1.272 (1.272) Loss 4.6838 (4.6838) Epoch: [6][200/836] Batch Time 0.280 (0.279) Data Time 0.000 (0.007) Loss 3.9830 (4.1453) Epoch: [6][400/836] Batch Time 0.271 (0.275) Data Time 0.000 (0.004) Loss 3.9052 (4.1139) Epoch: [6][600/836] Batch Time 0.291 (0.273) Data Time 0.000 (0.003) Loss 4.1893 (4.1435) Epoch: [6][800/836] Batch Time 0.277 (0.273) Data Time 0.000 (0.002) Loss 4.7687 (4.1645) One epoch time elapsed: 227.55938911437988 Epoch: [7][0/836] Batch Time 1.402 (1.402) Data Time 1.053 (1.053) Loss 3.9049 (3.9049) Epoch: [7][200/836] Batch Time 0.270 (0.279) Data Time 0.000 (0.007) Loss 4.1589 (4.1216) Epoch: [7][400/836] Batch Time 0.270 (0.276) Data Time 0.000 (0.004) Loss 3.1853 (4.0759) Epoch: [7][600/836] Batch Time 0.264 (0.275) Data Time 0.000 (0.003) Loss 4.1884 (4.0783) Epoch: [7][800/836] Batch Time 0.255 (0.274) Data Time 0.000 (0.002) Loss 4.8071 (4.0603) One epoch time elapsed: 228.5086669921875 Epoch: [8][0/836] Batch Time 1.485 (1.485) Data Time 1.147 (1.147) Loss 2.8619 (2.8619) Epoch: [8][200/836] Batch Time 0.261 (0.279) Data Time 0.000 (0.006) Loss 4.4373 (3.9747) Epoch: [8][400/836] Batch Time 0.276 (0.274) Data Time 0.000 (0.004) Loss 3.0996 (3.9126) Epoch: [8][600/836] Batch Time 0.267 (0.272) Data Time 0.000 (0.003) Loss 3.7524 (3.9288) Epoch: [8][800/836] Batch Time 0.276 (0.272) Data Time 0.000 (0.002) Loss 4.1807 (3.9500) One epoch time elapsed: 227.05095195770264 Epoch: [9][0/836] Batch Time 1.787 (1.787) Data Time 1.420 (1.420) Loss 3.7331 (3.7331) Epoch: [9][200/836] Batch Time 0.272 (0.278) Data Time 0.000 (0.008) Loss 4.5970 (3.9230) Epoch: [9][400/836] Batch Time 0.286 (0.274) Data Time 0.000 (0.004) Loss 3.7003 (3.8650) Epoch: [9][600/836] Batch Time 0.265 (0.273) Data Time 0.000 (0.003) Loss 2.7572 (3.8640) Epoch: [9][800/836] Batch Time 0.280 (0.272) Data Time 0.009 (0.002) Loss 4.1761 (3.8512) One epoch time elapsed: 226.87092757225037 Epoch: [10][0/836] Batch Time 1.159 (1.159) Data Time 0.830 (0.830) Loss 4.1218 (4.1218) Epoch: [10][200/836] Batch Time 0.257 (0.275) Data Time 0.000 (0.005) Loss 3.7975 (3.8651) Epoch: [10][400/836] Batch Time 0.273 (0.273) Data Time 0.000 (0.003) Loss 2.9871 (3.8468) Epoch: [10][600/836] Batch Time 0.276 (0.272) Data Time 0.000 (0.002) Loss 3.8935 (3.8150) Epoch: [10][800/836] Batch Time 0.278 (0.272) Data Time 0.000 (0.002) Loss 3.1751 (3.7774) One epoch time elapsed: 226.92225098609924 DECAYING learning rate. The new LR is 0.000100 Epoch: [11][0/836] Batch Time 1.479 (1.479) Data Time 1.141 (1.141) Loss 3.9145 (3.9145) Epoch: [11][200/836] Batch Time 0.265 (0.277) Data Time 0.000 (0.006) Loss 2.6867 (3.5482) Epoch: [11][400/836] Batch Time 0.268 (0.273) Data Time 0.000 (0.003) Loss 2.6507 (3.4699) Epoch: [11][600/836] Batch Time 0.256 (0.272) Data Time 0.000 (0.002) Loss 2.9986 (3.4504) Epoch: [11][800/836] Batch Time 0.271 (0.271) Data Time 0.000 (0.002) Loss 3.4559 (3.4348) One epoch time elapsed: 226.5672950744629 Epoch: [12][0/836] Batch Time 1.420 (1.420) Data Time 1.045 (1.045) Loss 3.6204 (3.6204) Epoch: [12][200/836] Batch Time 0.279 (0.278) Data Time 0.000 (0.006) Loss 3.4755 (3.3154) Epoch: [12][400/836] Batch Time 0.262 (0.273) Data Time 0.000 (0.003) Loss 3.0674 (3.3130) Epoch: [12][600/836] Batch Time 0.259 (0.272) Data Time 0.000 (0.002) Loss 3.3558 (3.3218) Epoch: [12][800/836] Batch Time 0.281 (0.271) Data Time 0.002 (0.002) Loss 3.2215 (3.3105) One epoch time elapsed: 226.6799681186676 Epoch: [13][0/836] Batch Time 1.579 (1.579) Data Time 1.243 (1.243) Loss 3.2995 (3.2995) Epoch: [13][200/836] Batch Time 0.286 (0.276) Data Time 0.000 (0.007) Loss 3.6529 (3.2514) Epoch: [13][400/836] Batch Time 0.264 (0.273) Data Time 0.000 (0.004) Loss 3.0491 (3.2668) Epoch: [13][600/836] Batch Time 0.280 (0.271) Data Time 0.000 (0.003) Loss 3.7705 (3.2790) Epoch: [13][800/836] Batch Time 0.261 (0.271) Data Time 0.000 (0.002) Loss 3.6587 (3.2879) One epoch time elapsed: 226.04720759391785 DECAYING learning rate. The new LR is 0.000010 Epoch: [14][0/836] Batch Time 1.801 (1.801) Data Time 1.467 (1.467) Loss 2.7694 (2.7694) Epoch: [14][200/836] Batch Time 0.266 (0.277) Data Time 0.000 (0.008) Loss 3.3710 (3.2349) Epoch: [14][400/836] Batch Time 0.257 (0.273) Data Time 0.000 (0.004) Loss 3.3091 (3.2628) Epoch: [14][600/836] Batch Time 0.295 (0.272) Data Time 0.000 (0.003) Loss 3.4143 (3.2442) Epoch: [14][800/836] Batch Time 0.275 (0.272) Data Time 0.000 (0.002) Loss 3.4412 (3.2533) One epoch time elapsed: 227.1422462463379 Epoch: [15][0/836] Batch Time 1.364 (1.364) Data Time 1.037 (1.037) Loss 2.3587 (2.3587) Epoch: [15][200/836] Batch Time 0.259 (0.275) Data Time 0.000 (0.006) Loss 3.1170 (3.2527) Epoch: [15][400/836] Batch Time 0.267 (0.272) Data Time 0.000 (0.003) Loss 2.1223 (3.2673) Epoch: [15][600/836] Batch Time 0.256 (0.272) Data Time 0.000 (0.002) Loss 3.0784 (3.2574) Epoch: [15][800/836] Batch Time 0.253 (0.271) Data Time 0.000 (0.002) Loss 3.3807 (3.2631) One epoch time elapsed: 226.17342925071716 Epoch: [16][0/836] Batch Time 1.009 (1.009) Data Time 0.725 (0.725) Loss 3.4928 (3.4928) Epoch: [16][200/836] Batch Time 0.265 (0.275) Data Time 0.000 (0.006) Loss 3.4165 (3.2120) Epoch: [16][400/836] Batch Time 0.276 (0.273) Data Time 0.000 (0.003) Loss 2.6315 (3.2116) Epoch: [16][600/836] Batch Time 0.290 (0.272) Data Time 0.000 (0.002) Loss 2.9339 (3.2131) Epoch: [16][800/836] Batch Time 0.291 (0.271) Data Time 0.009 (0.002) Loss 3.2444 (3.2212) One epoch time elapsed: 226.229727268219 time elapsed: 4029.5247569084167
This should be run after implementing the ResNet Base.
start_time = time.time()
train_SSD(base_type='ResNet', lr_type='original_scheduler')
end_time = time.time()
print("time elapsed:", end_time - start_time)
Number of iterations 15000 Dataset length 5011 batch size 6 Number of Epochs to train: 17 Epochs to decay learning rate: [11, 14]
/usr/local/lib/python3.7/dist-packages/torch/nn/_reduction.py:44: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. warnings.warn(warning.format(ret))
Epoch: [0][0/836] Batch Time 1.081 (1.081) Data Time 0.846 (0.846) Loss 23.9430 (23.9430) Epoch: [0][200/836] Batch Time 0.172 (0.228) Data Time 0.000 (0.062) Loss 6.4575 (10.7724) Epoch: [0][400/836] Batch Time 0.181 (0.222) Data Time 0.000 (0.055) Loss 7.1615 (8.5670) Epoch: [0][600/836] Batch Time 0.160 (0.221) Data Time 0.000 (0.055) Loss 5.6801 (7.7537) Epoch: [0][800/836] Batch Time 0.163 (0.221) Data Time 0.000 (0.053) Loss 5.9458 (7.3129) One epoch time elapsed: 184.69244575500488 Epoch: [1][0/836] Batch Time 1.299 (1.299) Data Time 1.052 (1.052) Loss 5.2727 (5.2727) Epoch: [1][200/836] Batch Time 0.171 (0.230) Data Time 0.000 (0.058) Loss 5.7796 (5.8936) Epoch: [1][400/836] Batch Time 0.171 (0.225) Data Time 0.000 (0.055) Loss 6.3773 (5.8333) Epoch: [1][600/836] Batch Time 0.167 (0.226) Data Time 0.000 (0.055) Loss 5.7716 (5.7862) Epoch: [1][800/836] Batch Time 0.212 (0.226) Data Time 0.004 (0.055) Loss 5.2263 (5.7428) One epoch time elapsed: 188.72328352928162 Epoch: [2][0/836] Batch Time 1.271 (1.271) Data Time 1.013 (1.013) Loss 6.1192 (6.1192) Epoch: [2][200/836] Batch Time 0.179 (0.229) Data Time 0.000 (0.060) Loss 5.6989 (5.4897) Epoch: [2][400/836] Batch Time 0.191 (0.224) Data Time 0.008 (0.055) Loss 5.7383 (5.4212) Epoch: [2][600/836] Batch Time 0.413 (0.224) Data Time 0.248 (0.054) Loss 5.7015 (5.3926) Epoch: [2][800/836] Batch Time 0.168 (0.223) Data Time 0.000 (0.054) Loss 5.9510 (5.3732) One epoch time elapsed: 186.33891081809998 Epoch: [3][0/836] Batch Time 1.564 (1.564) Data Time 1.331 (1.331) Loss 5.7964 (5.7964) Epoch: [3][200/836] Batch Time 0.190 (0.230) Data Time 0.000 (0.060) Loss 4.9975 (5.1679) Epoch: [3][400/836] Batch Time 0.168 (0.228) Data Time 0.000 (0.058) Loss 5.6551 (5.1325) Epoch: [3][600/836] Batch Time 0.209 (0.223) Data Time 0.001 (0.053) Loss 4.8158 (5.0998) Epoch: [3][800/836] Batch Time 0.135 (0.225) Data Time 0.000 (0.054) Loss 4.5964 (5.0795) One epoch time elapsed: 187.25636506080627 Epoch: [4][0/836] Batch Time 1.714 (1.714) Data Time 1.496 (1.496) Loss 4.9904 (4.9904) Epoch: [4][200/836] Batch Time 0.168 (0.225) Data Time 0.000 (0.056) Loss 5.4823 (4.8894) Epoch: [4][400/836] Batch Time 0.197 (0.226) Data Time 0.000 (0.058) Loss 4.2882 (4.8831) Epoch: [4][600/836] Batch Time 0.139 (0.223) Data Time 0.000 (0.055) Loss 5.1647 (4.8497) Epoch: [4][800/836] Batch Time 0.168 (0.223) Data Time 0.000 (0.055) Loss 4.7277 (4.8287) One epoch time elapsed: 185.90052390098572 Epoch: [5][0/836] Batch Time 1.123 (1.123) Data Time 0.909 (0.909) Loss 4.7079 (4.7079) Epoch: [5][200/836] Batch Time 0.171 (0.228) Data Time 0.007 (0.058) Loss 3.7862 (4.7905) Epoch: [5][400/836] Batch Time 0.154 (0.224) Data Time 0.000 (0.055) Loss 4.5454 (4.7523) Epoch: [5][600/836] Batch Time 0.160 (0.224) Data Time 0.000 (0.055) Loss 4.1026 (4.7073) Epoch: [5][800/836] Batch Time 0.187 (0.223) Data Time 0.004 (0.053) Loss 4.9267 (4.6865) One epoch time elapsed: 185.70243000984192 Epoch: [6][0/836] Batch Time 0.846 (0.846) Data Time 0.674 (0.674) Loss 4.2003 (4.2003) Epoch: [6][200/836] Batch Time 0.150 (0.229) Data Time 0.000 (0.062) Loss 4.6449 (4.5522) Epoch: [6][400/836] Batch Time 0.184 (0.224) Data Time 0.000 (0.055) Loss 4.5176 (4.5339) Epoch: [6][600/836] Batch Time 0.187 (0.224) Data Time 0.000 (0.055) Loss 4.0916 (4.5333) Epoch: [6][800/836] Batch Time 0.761 (0.225) Data Time 0.621 (0.056) Loss 3.8949 (4.5077) One epoch time elapsed: 187.44984698295593 Epoch: [7][0/836] Batch Time 1.323 (1.323) Data Time 1.077 (1.077) Loss 4.0710 (4.0710) Epoch: [7][200/836] Batch Time 0.177 (0.229) Data Time 0.000 (0.059) Loss 4.6246 (4.4752) Epoch: [7][400/836] Batch Time 0.334 (0.225) Data Time 0.165 (0.057) Loss 4.3495 (4.4646) Epoch: [7][600/836] Batch Time 0.210 (0.224) Data Time 0.005 (0.055) Loss 5.5567 (4.4631) Epoch: [7][800/836] Batch Time 0.255 (0.223) Data Time 0.099 (0.054) Loss 3.6758 (4.4586) One epoch time elapsed: 186.0502529144287 Epoch: [8][0/836] Batch Time 0.986 (0.986) Data Time 0.807 (0.807) Loss 4.4557 (4.4557) Epoch: [8][200/836] Batch Time 0.153 (0.231) Data Time 0.000 (0.061) Loss 4.7459 (4.3084) Epoch: [8][400/836] Batch Time 0.170 (0.225) Data Time 0.000 (0.055) Loss 4.0024 (4.3055) Epoch: [8][600/836] Batch Time 0.329 (0.223) Data Time 0.163 (0.053) Loss 4.9278 (4.3439) Epoch: [8][800/836] Batch Time 0.186 (0.223) Data Time 0.004 (0.053) Loss 5.1929 (4.3391) One epoch time elapsed: 185.50461602210999 Epoch: [9][0/836] Batch Time 0.975 (0.975) Data Time 0.755 (0.755) Loss 4.2428 (4.2428) Epoch: [9][200/836] Batch Time 0.160 (0.223) Data Time 0.000 (0.052) Loss 3.4068 (4.2258) Epoch: [9][400/836] Batch Time 0.168 (0.224) Data Time 0.000 (0.054) Loss 3.9249 (4.2441) Epoch: [9][600/836] Batch Time 0.182 (0.222) Data Time 0.000 (0.052) Loss 4.0695 (4.2268) Epoch: [9][800/836] Batch Time 0.179 (0.221) Data Time 0.000 (0.051) Loss 4.2078 (4.2243) One epoch time elapsed: 184.34885120391846 Epoch: [10][0/836] Batch Time 1.367 (1.367) Data Time 1.124 (1.124) Loss 3.8885 (3.8885) Epoch: [10][200/836] Batch Time 0.158 (0.223) Data Time 0.005 (0.052) Loss 5.6498 (4.1901) Epoch: [10][400/836] Batch Time 0.185 (0.226) Data Time 0.000 (0.056) Loss 3.7192 (4.1910) Epoch: [10][600/836] Batch Time 0.158 (0.225) Data Time 0.000 (0.056) Loss 4.1662 (4.1806) Epoch: [10][800/836] Batch Time 0.337 (0.223) Data Time 0.188 (0.054) Loss 4.6685 (4.1644) One epoch time elapsed: 185.37018275260925 DECAYING learning rate. The new LR is 0.000100 Epoch: [11][0/836] Batch Time 1.874 (1.874) Data Time 1.653 (1.653) Loss 4.1751 (4.1751) Epoch: [11][200/836] Batch Time 0.179 (0.227) Data Time 0.000 (0.058) Loss 3.2719 (3.9861) Epoch: [11][400/836] Batch Time 0.161 (0.225) Data Time 0.000 (0.055) Loss 3.5820 (3.9297) Epoch: [11][600/836] Batch Time 0.183 (0.224) Data Time 0.002 (0.054) Loss 3.8172 (3.9009) Epoch: [11][800/836] Batch Time 0.173 (0.222) Data Time 0.007 (0.053) Loss 4.1530 (3.8730) One epoch time elapsed: 185.28597807884216 Epoch: [12][0/836] Batch Time 1.753 (1.753) Data Time 1.520 (1.520) Loss 4.1679 (4.1679) Epoch: [12][200/836] Batch Time 0.153 (0.225) Data Time 0.002 (0.055) Loss 3.4151 (3.8406) Epoch: [12][400/836] Batch Time 0.149 (0.224) Data Time 0.000 (0.055) Loss 3.6012 (3.8300) Epoch: [12][600/836] Batch Time 0.172 (0.224) Data Time 0.000 (0.055) Loss 3.6394 (3.8146) Epoch: [12][800/836] Batch Time 0.169 (0.222) Data Time 0.000 (0.053) Loss 3.8320 (3.8115) One epoch time elapsed: 185.63955950737 Epoch: [13][0/836] Batch Time 1.259 (1.259) Data Time 1.036 (1.036) Loss 4.3314 (4.3314) Epoch: [13][200/836] Batch Time 0.191 (0.231) Data Time 0.000 (0.062) Loss 3.7420 (3.7744) Epoch: [13][400/836] Batch Time 0.182 (0.225) Data Time 0.004 (0.057) Loss 3.3081 (3.7645) Epoch: [13][600/836] Batch Time 0.150 (0.225) Data Time 0.000 (0.056) Loss 4.4821 (3.7665) Epoch: [13][800/836] Batch Time 0.228 (0.224) Data Time 0.056 (0.056) Loss 3.2643 (3.7484) One epoch time elapsed: 186.78839135169983 DECAYING learning rate. The new LR is 0.000010 Epoch: [14][0/836] Batch Time 1.207 (1.207) Data Time 0.932 (0.932) Loss 3.1967 (3.1967) Epoch: [14][200/836] Batch Time 0.165 (0.221) Data Time 0.000 (0.048) Loss 3.5771 (3.7167) Epoch: [14][400/836] Batch Time 0.159 (0.226) Data Time 0.010 (0.055) Loss 3.6856 (3.7403) Epoch: [14][600/836] Batch Time 0.162 (0.224) Data Time 0.000 (0.054) Loss 3.7055 (3.7548) Epoch: [14][800/836] Batch Time 0.391 (0.222) Data Time 0.222 (0.053) Loss 4.3432 (3.7411) One epoch time elapsed: 185.41887760162354 Epoch: [15][0/836] Batch Time 1.612 (1.612) Data Time 1.379 (1.379) Loss 4.1939 (4.1939) Epoch: [15][200/836] Batch Time 0.156 (0.236) Data Time 0.000 (0.067) Loss 5.0950 (3.6825) Epoch: [15][400/836] Batch Time 0.187 (0.230) Data Time 0.000 (0.061) Loss 4.6599 (3.7096) Epoch: [15][600/836] Batch Time 0.190 (0.227) Data Time 0.000 (0.058) Loss 3.2482 (3.7202) Epoch: [15][800/836] Batch Time 0.203 (0.224) Data Time 0.009 (0.055) Loss 2.6449 (3.7321) One epoch time elapsed: 187.13002943992615 Epoch: [16][0/836] Batch Time 1.373 (1.373) Data Time 1.158 (1.158) Loss 4.2898 (4.2898) Epoch: [16][200/836] Batch Time 0.782 (0.233) Data Time 0.635 (0.064) Loss 3.6442 (3.7174) Epoch: [16][400/836] Batch Time 0.159 (0.224) Data Time 0.000 (0.054) Loss 3.6415 (3.7221) Epoch: [16][600/836] Batch Time 0.338 (0.220) Data Time 0.177 (0.050) Loss 2.8522 (3.7186) Epoch: [16][800/836] Batch Time 0.180 (0.221) Data Time 0.000 (0.052) Loss 3.3783 (3.7225) One epoch time elapsed: 184.30127692222595 time elapsed: 3176.6286170482635
This should be run after modifyng the training loop to use a learning rate scheduler.
start_time = time.time()
train_SSD(base_type='VGG', lr_type='pytorch_scheduler')
end_time = time.time()
print("time elapsed:", end_time - start_time)
Number of iterations 15000 Dataset length 5011 batch size 6 Number of Epochs to train: 17 Loaded base model. Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03.
/usr/local/lib/python3.7/dist-packages/torch/nn/_reduction.py:44: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. warnings.warn(warning.format(ret))
Epoch: [0][0/836] Batch Time 1.343 (1.343) Data Time 0.987 (0.987) Loss 23.4372 (23.4372) Epoch: [0][200/836] Batch Time 0.251 (0.280) Data Time 0.000 (0.006) Loss 6.0953 (10.5427) Epoch: [0][400/836] Batch Time 0.276 (0.275) Data Time 0.000 (0.004) Loss 6.5257 (8.7507) Epoch: [0][600/836] Batch Time 0.260 (0.274) Data Time 0.000 (0.003) Loss 6.4529 (7.9559) Epoch: [0][800/836] Batch Time 0.273 (0.272) Data Time 0.000 (0.002) Loss 5.4534 (7.4933) One epoch time elapsed: 227.53785347938538 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [1][0/836] Batch Time 1.463 (1.463) Data Time 1.081 (1.081) Loss 6.8329 (6.8329) Epoch: [1][200/836] Batch Time 0.264 (0.278) Data Time 0.000 (0.007) Loss 5.2419 (6.1249) Epoch: [1][400/836] Batch Time 0.286 (0.274) Data Time 0.000 (0.004) Loss 5.5226 (6.0326) Epoch: [1][600/836] Batch Time 0.286 (0.273) Data Time 0.004 (0.003) Loss 5.6030 (5.9909) Epoch: [1][800/836] Batch Time 0.267 (0.273) Data Time 0.000 (0.002) Loss 5.9204 (5.9358) One epoch time elapsed: 227.70607733726501 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [2][0/836] Batch Time 1.524 (1.524) Data Time 1.144 (1.144) Loss 5.3850 (5.3850) Epoch: [2][200/836] Batch Time 0.274 (0.280) Data Time 0.000 (0.007) Loss 5.1859 (5.7478) Epoch: [2][400/836] Batch Time 0.260 (0.275) Data Time 0.000 (0.004) Loss 5.8273 (5.6504) Epoch: [2][600/836] Batch Time 0.270 (0.274) Data Time 0.000 (0.003) Loss 4.5833 (5.5830) Epoch: [2][800/836] Batch Time 0.288 (0.273) Data Time 0.000 (0.002) Loss 5.0644 (5.5240) One epoch time elapsed: 227.57433557510376 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [3][0/836] Batch Time 1.367 (1.367) Data Time 1.010 (1.010) Loss 4.8178 (4.8178) Epoch: [3][200/836] Batch Time 0.270 (0.276) Data Time 0.000 (0.005) Loss 5.8457 (5.2629) Epoch: [3][400/836] Batch Time 0.258 (0.273) Data Time 0.000 (0.003) Loss 5.6584 (5.2445) Epoch: [3][600/836] Batch Time 0.256 (0.272) Data Time 0.000 (0.002) Loss 4.8845 (5.1802) Epoch: [3][800/836] Batch Time 0.278 (0.272) Data Time 0.000 (0.002) Loss 4.9766 (5.1484) One epoch time elapsed: 226.73477053642273 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [4][0/836] Batch Time 1.947 (1.947) Data Time 1.608 (1.608) Loss 5.4869 (5.4869) Epoch: [4][200/836] Batch Time 0.259 (0.277) Data Time 0.000 (0.008) Loss 4.2823 (4.9472) Epoch: [4][400/836] Batch Time 0.273 (0.273) Data Time 0.000 (0.004) Loss 5.0759 (4.9009) Epoch: [4][600/836] Batch Time 0.272 (0.271) Data Time 0.000 (0.003) Loss 5.2606 (4.8682) Epoch: [4][800/836] Batch Time 0.262 (0.271) Data Time 0.000 (0.002) Loss 4.3791 (4.8549) One epoch time elapsed: 226.51552081108093 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [5][0/836] Batch Time 1.724 (1.724) Data Time 1.376 (1.376) Loss 4.4146 (4.4146) Epoch: [5][200/836] Batch Time 0.295 (0.280) Data Time 0.000 (0.008) Loss 4.7761 (4.6645) Epoch: [5][400/836] Batch Time 0.262 (0.276) Data Time 0.000 (0.004) Loss 4.4776 (4.6635) Epoch: [5][600/836] Batch Time 0.277 (0.273) Data Time 0.005 (0.003) Loss 4.4214 (4.6509) Epoch: [5][800/836] Batch Time 0.268 (0.273) Data Time 0.000 (0.002) Loss 4.9742 (4.6165) One epoch time elapsed: 227.59743905067444 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [6][0/836] Batch Time 1.137 (1.137) Data Time 0.793 (0.793) Loss 5.6018 (5.6018) Epoch: [6][200/836] Batch Time 0.282 (0.276) Data Time 0.000 (0.005) Loss 4.4475 (4.4546) Epoch: [6][400/836] Batch Time 0.261 (0.273) Data Time 0.000 (0.003) Loss 5.1814 (4.4143) Epoch: [6][600/836] Batch Time 0.272 (0.273) Data Time 0.000 (0.002) Loss 3.2317 (4.4021) Epoch: [6][800/836] Batch Time 0.258 (0.272) Data Time 0.000 (0.002) Loss 4.3235 (4.4047) One epoch time elapsed: 227.19121026992798 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [7][0/836] Batch Time 1.811 (1.811) Data Time 1.469 (1.469) Loss 4.5537 (4.5537) Epoch: [7][200/836] Batch Time 0.276 (0.280) Data Time 0.000 (0.008) Loss 4.6286 (4.3140) Epoch: [7][400/836] Batch Time 0.265 (0.275) Data Time 0.000 (0.004) Loss 4.2871 (4.2918) Epoch: [7][600/836] Batch Time 0.285 (0.273) Data Time 0.000 (0.003) Loss 4.1656 (4.2724) Epoch: [7][800/836] Batch Time 0.285 (0.272) Data Time 0.005 (0.002) Loss 4.7168 (4.2580) One epoch time elapsed: 227.234929561615 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [8][0/836] Batch Time 1.614 (1.614) Data Time 1.242 (1.242) Loss 4.7168 (4.7168) Epoch: [8][200/836] Batch Time 0.269 (0.278) Data Time 0.000 (0.007) Loss 3.8600 (4.2072) Epoch: [8][400/836] Batch Time 0.265 (0.274) Data Time 0.000 (0.004) Loss 3.1776 (4.1578) Epoch: [8][600/836] Batch Time 0.284 (0.273) Data Time 0.000 (0.003) Loss 3.6530 (4.1380) Epoch: [8][800/836] Batch Time 0.272 (0.272) Data Time 0.000 (0.002) Loss 3.7811 (4.1344) One epoch time elapsed: 227.18890571594238 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [9][0/836] Batch Time 1.321 (1.321) Data Time 0.933 (0.933) Loss 4.2419 (4.2419) Epoch: [9][200/836] Batch Time 0.266 (0.279) Data Time 0.000 (0.007) Loss 3.5906 (4.0251) Epoch: [9][400/836] Batch Time 0.281 (0.274) Data Time 0.000 (0.004) Loss 3.8385 (3.9724) Epoch: [9][600/836] Batch Time 0.263 (0.273) Data Time 0.000 (0.003) Loss 3.3359 (3.9681) Epoch: [9][800/836] Batch Time 0.288 (0.272) Data Time 0.000 (0.002) Loss 4.2231 (3.9727) One epoch time elapsed: 227.25981211662292 Adjusting learning rate of group 0 to 2.0000e-03. Adjusting learning rate of group 1 to 1.0000e-03. Epoch: [10][0/836] Batch Time 1.098 (1.098) Data Time 0.750 (0.750) Loss 3.5101 (3.5101) Epoch: [10][200/836] Batch Time 0.263 (0.279) Data Time 0.000 (0.007) Loss 4.6579 (3.8963) Epoch: [10][400/836] Batch Time 0.290 (0.274) Data Time 0.005 (0.004) Loss 4.6202 (3.8900) Epoch: [10][600/836] Batch Time 0.269 (0.273) Data Time 0.000 (0.003) Loss 3.8366 (3.8700) Epoch: [10][800/836] Batch Time 0.276 (0.272) Data Time 0.000 (0.002) Loss 3.2439 (3.8686) One epoch time elapsed: 227.27550101280212 Adjusting learning rate of group 0 to 2.0000e-04. Adjusting learning rate of group 1 to 1.0000e-04. Epoch: [11][0/836] Batch Time 1.496 (1.496) Data Time 1.143 (1.143) Loss 4.7483 (4.7483) Epoch: [11][200/836] Batch Time 0.270 (0.277) Data Time 0.000 (0.006) Loss 2.7078 (3.6210) Epoch: [11][400/836] Batch Time 0.258 (0.273) Data Time 0.000 (0.004) Loss 3.8858 (3.5682) Epoch: [11][600/836] Batch Time 0.263 (0.272) Data Time 0.000 (0.003) Loss 4.3290 (3.5427) Epoch: [11][800/836] Batch Time 0.266 (0.272) Data Time 0.000 (0.002) Loss 3.4923 (3.5216) One epoch time elapsed: 226.92391991615295 Adjusting learning rate of group 0 to 2.0000e-04. Adjusting learning rate of group 1 to 1.0000e-04. Epoch: [12][0/836] Batch Time 1.396 (1.396) Data Time 1.032 (1.032) Loss 4.1357 (4.1357) Epoch: [12][200/836] Batch Time 0.265 (0.278) Data Time 0.000 (0.006) Loss 3.9501 (3.3889) Epoch: [12][400/836] Batch Time 0.262 (0.273) Data Time 0.000 (0.003) Loss 3.2066 (3.3826) Epoch: [12][600/836] Batch Time 0.252 (0.272) Data Time 0.000 (0.002) Loss 4.3645 (3.4281) Epoch: [12][800/836] Batch Time 0.282 (0.272) Data Time 0.000 (0.002) Loss 3.1806 (3.4331) One epoch time elapsed: 226.62711930274963 Adjusting learning rate of group 0 to 2.0000e-04. Adjusting learning rate of group 1 to 1.0000e-04. Epoch: [13][0/836] Batch Time 1.289 (1.289) Data Time 0.955 (0.955) Loss 3.4714 (3.4714) Epoch: [13][200/836] Batch Time 0.269 (0.280) Data Time 0.000 (0.009) Loss 3.3747 (3.4476) Epoch: [13][400/836] Batch Time 0.254 (0.275) Data Time 0.000 (0.005) Loss 3.6058 (3.4315) Epoch: [13][600/836] Batch Time 0.264 (0.273) Data Time 0.000 (0.004) Loss 3.7975 (3.4250) Epoch: [13][800/836] Batch Time 0.263 (0.273) Data Time 0.000 (0.003) Loss 3.6081 (3.4091) One epoch time elapsed: 227.6345660686493 Adjusting learning rate of group 0 to 2.0000e-05. Adjusting learning rate of group 1 to 1.0000e-05. Epoch: [14][0/836] Batch Time 1.313 (1.313) Data Time 0.956 (0.956) Loss 3.3939 (3.3939) Epoch: [14][200/836] Batch Time 0.276 (0.277) Data Time 0.000 (0.007) Loss 3.1305 (3.3456) Epoch: [14][400/836] Batch Time 0.253 (0.274) Data Time 0.000 (0.004) Loss 3.2584 (3.3774) Epoch: [14][600/836] Batch Time 0.292 (0.272) Data Time 0.000 (0.003) Loss 2.9942 (3.3572) Epoch: [14][800/836] Batch Time 0.271 (0.272) Data Time 0.000 (0.002) Loss 1.9271 (3.3570) One epoch time elapsed: 226.9565052986145 Adjusting learning rate of group 0 to 2.0000e-05. Adjusting learning rate of group 1 to 1.0000e-05. Epoch: [15][0/836] Batch Time 2.043 (2.043) Data Time 1.684 (1.684) Loss 3.1222 (3.1222) Epoch: [15][200/836] Batch Time 0.269 (0.281) Data Time 0.000 (0.009) Loss 3.1743 (3.3600) Epoch: [15][400/836] Batch Time 0.267 (0.275) Data Time 0.000 (0.005) Loss 3.2778 (3.3408) Epoch: [15][600/836] Batch Time 0.269 (0.274) Data Time 0.000 (0.003) Loss 2.6946 (3.3531) Epoch: [15][800/836] Batch Time 0.274 (0.273) Data Time 0.000 (0.003) Loss 2.7574 (3.3457) One epoch time elapsed: 227.78893542289734 Adjusting learning rate of group 0 to 2.0000e-05. Adjusting learning rate of group 1 to 1.0000e-05. Epoch: [16][0/836] Batch Time 1.617 (1.617) Data Time 1.248 (1.248) Loss 4.0006 (4.0006) Epoch: [16][200/836] Batch Time 0.267 (0.278) Data Time 0.000 (0.007) Loss 3.7132 (3.3981) Epoch: [16][400/836] Batch Time 0.302 (0.273) Data Time 0.007 (0.004) Loss 3.1165 (3.3600) Epoch: [16][600/836] Batch Time 0.276 (0.272) Data Time 0.000 (0.003) Loss 3.4064 (3.3680) Epoch: [16][800/836] Batch Time 0.270 (0.271) Data Time 0.000 (0.002) Loss 2.9377 (3.3443) One epoch time elapsed: 226.15768575668335 Adjusting learning rate of group 0 to 2.0000e-05. Adjusting learning rate of group 1 to 1.0000e-05. time elapsed: 3894.7326452732086
Now let's run the eval code, it should take about 30 minutes per model.
from utils import *
# from datasets import PascalVOCDataset
from tqdm import tqdm
from pprint import PrettyPrinter
# Good formatting when printing the APs for each class and mAP
pp = PrettyPrinter()
# Parameters
data_folder = './'
keep_difficult = True # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!
batch_size = 64
workers = 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = './checkpoint_ssd300_VGG.pth.tar'
# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
# Switch to eval mode
model.eval()
# Load test data
test_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
def evaluate(test_loader, model):
"""
Evaluate.
:param test_loader: DataLoader for test data
:param model: model
"""
# Make sure it's in eval mode
model.eval()
# Lists to store detected and true boxes, labels, scores
det_boxes = list()
det_labels = list()
det_scores = list()
true_boxes = list()
true_labels = list()
true_difficulties = list() # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py
with torch.no_grad():
# Batches
for i, (images, boxes, labels, difficulties) in enumerate(tqdm(test_loader, desc='Evaluating')):
images = images.to(device) # (N, 3, 300, 300)
# Forward prop.
predicted_locs, predicted_scores = model(images)
# Detect objects in SSD output
det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores,
min_score=0.01, max_overlap=0.45,
top_k=200)
# Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos
# Store this batch's results for mAP calculation
boxes = [b.to(device) for b in boxes]
labels = [l.to(device) for l in labels]
difficulties = [d.to(device) for d in difficulties]
det_boxes.extend(det_boxes_batch)
det_labels.extend(det_labels_batch)
det_scores.extend(det_scores_batch)
true_boxes.extend(boxes)
true_labels.extend(labels)
true_difficulties.extend(difficulties)
# Calculate mAP
APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties)
# Print AP for each class
pp.pprint(APs)
print('\nMean Average Precision (mAP): %.3f' % mAP)
Your model should output an mAP about the same as this:
Mean Average Precision (mAP): 0.589
checkpoint = './checkpoint_ssd300_VGG.pth.tar'
# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
# Switch to eval mode
model.eval()
# Load test data
test_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
evaluate(test_loader, model)
Evaluating: 0%| | 0/78 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:183: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:185: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) Evaluating: 100%|██████████| 78/78 [15:47<00:00, 12.15s/it]
{'aeroplane': 0.6738564968109131,
'bicycle': 0.7219585180282593,
'bird': 0.5783435106277466,
'boat': 0.4228781461715698,
'bottle': 0.17184174060821533,
'bus': 0.6805016398429871,
'car': 0.7546239495277405,
'cat': 0.7906004786491394,
'chair': 0.3071005344390869,
'cow': 0.6505981683731079,
'diningtable': 0.4952280819416046,
'dog': 0.7667381167411804,
'horse': 0.7754424810409546,
'motorbike': 0.715682864189148,
'person': 0.6367228627204895,
'pottedplant': 0.2622371315956116,
'sheep': 0.5804213285446167,
'sofa': 0.5441953539848328,
'train': 0.7429677248001099,
'tvmonitor': 0.606717586517334}
Mean Average Precision (mAP): 0.594
checkpoint = './checkpoint_ssd300_ResNet.pth.tar'
# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
# Switch to eval mode
model.eval()
# Load test data
test_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
evaluate(test_loader, model)
Evaluating: 0%| | 0/78 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:183: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:185: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) Evaluating: 100%|██████████| 78/78 [28:47<00:00, 22.15s/it]
{'aeroplane': 0.5623462796211243,
'bicycle': 0.6031755208969116,
'bird': 0.5085681080818176,
'boat': 0.24675048887729645,
'bottle': 0.2263406217098236,
'bus': 0.5424309968948364,
'car': 0.7316646575927734,
'cat': 0.6334554553031921,
'chair': 0.3372231423854828,
'cow': 0.4018259048461914,
'diningtable': 0.3483617305755615,
'dog': 0.5849977731704712,
'horse': 0.6911000609397888,
'motorbike': 0.6321118474006653,
'person': 0.6465883851051331,
'pottedplant': 0.222884863615036,
'sheep': 0.3083771765232086,
'sofa': 0.5663437843322754,
'train': 0.5420916676521301,
'tvmonitor': 0.5409076809883118}
Mean Average Precision (mAP): 0.494
checkpoint = './checkpoint_ssd300_VGG_scheduler.pth.tar'
# Load model checkpoint that is to be evaluated
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
model = model.to(device)
# Switch to eval mode
model.eval()
# Load test data
test_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)
evaluate(test_loader, model)
Evaluating: 0%| | 0/78 [00:00<?, ?it/s]/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:183: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:185: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) Evaluating: 100%|██████████| 78/78 [16:55<00:00, 13.02s/it]
{'aeroplane': 0.6447340846061707,
'bicycle': 0.6903569102287292,
'bird': 0.538161039352417,
'boat': 0.27150246500968933,
'bottle': 0.18686126172542572,
'bus': 0.6603901386260986,
'car': 0.7596113085746765,
'cat': 0.7549920678138733,
'chair': 0.27595269680023193,
'cow': 0.5758591890335083,
'diningtable': 0.41432175040245056,
'dog': 0.7536466717720032,
'horse': 0.7312289476394653,
'motorbike': 0.7030423879623413,
'person': 0.6245583891868591,
'pottedplant': 0.20302662253379822,
'sheep': 0.572557270526886,
'sofa': 0.5214038491249084,
'train': 0.6827385425567627,
'tvmonitor': 0.5715056657791138}
Mean Average Precision (mAP): 0.557
And lastly let's view some images with our detections!
from torchvision import transforms
from utils import *
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load model checkpoint
checkpoint = 'checkpoint_ssd300_VGG_scheduler.pth.tar'
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
model = model.to(device)
model.eval()
# Transforms
resize = transforms.Resize((300, 300))
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def detect(original_image, min_score, max_overlap, top_k, suppress=None):
"""
Detect objects in an image with a trained SSD300, and visualize the results.
:param original_image: image, a PIL Image
:param min_score: minimum threshold for a detected box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS)
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:param suppress: classes that you know for sure cannot be in the image or you do not want in the image, a list
:return: annotated image, a PIL Image
"""
# Transform
image = normalize(to_tensor(resize(original_image)))
# Move to default device
image = image.to(device)
# Forward prop.
predicted_locs, predicted_scores = model(image.unsqueeze(0))
# Detect objects in SSD output
det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score,
max_overlap=max_overlap, top_k=top_k)
# Move detections to the CPU
det_boxes = det_boxes[0].to('cpu')
# Transform to original image dimensions
original_dims = torch.FloatTensor(
[original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)
det_boxes = det_boxes * original_dims
# Decode class integer labels
det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]
# If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py
if det_labels == ['background']:
# Just return original image
return original_image
# Annotate
annotated_image = original_image
draw = ImageDraw.Draw(annotated_image)
font = ImageFont.load_default() # ImageFont.truetype("./calibril.ttf", 15)
# Suppress specific classes, if needed
for i in range(det_boxes.size(0)):
if suppress is not None:
if det_labels[i] in suppress:
continue
# Boxes
box_location = det_boxes[i].tolist()
draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])
draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[
det_labels[i]]) # a second rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 2. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a third rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 3. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a fourth rectangle at an offset of 1 pixel to increase line thickness
# Text
text_size = font.getsize(det_labels[i].upper())
text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
box_location[1]]
draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]])
draw.text(xy=text_location, text=det_labels[i].upper(), fill='white',
font=font)
del draw
return annotated_image
relevant_images = [
'000012.jpg', # Car
'000014.jpg', # Car, Bus
'000026.jpg', # Car
'000038.jpg', # Cyclist
'000054.jpg', # Bus
'000091.jpg', # Vehicles parked, far from camera
'000111.jpg', # Cyclists in race, far from camera
'000129.jpg' # Cyclists in race, close to camera
]
for rel_img_file_name in relevant_images:
img_path = '/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4/VOCdevkit/VOC2007/JPEGImages/' + rel_img_file_name
original_image = Image.open(img_path, mode='r')
original_image = original_image.convert('RGB')
img = detect(original_image, min_score=0.2, max_overlap=0.5, top_k=200)
fig = plt.figure(figsize=(10,10))
ax1 = fig.add_subplot(1,1,1)
ax1.imshow(img)
Loaded checkpoint from epoch 17.
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:183: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:185: UserWarning: indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead. (Triggered internally at /pytorch/aten/src/ATen/native/IndexingUtils.h:25.)